In smoking_status Unknown should be changed to NA.
Also, it can be ordered: never < formerly < smokes
ever_married can be recoded as 0/1 in accordance with
heart_disease and hypertension
Other predictors seem to be OK
df <- read_csv("data/healthcare-dataset-stroke-data.csv", col_types = "cfdfffffddcf", na = c("Unknown", "N/A"))
# if you set smoking_status to factor in col_types, na() won't work
df$smoking_status <- as_factor(df$smoking_status)
df$smoking_status <- fct_relevel(df$smoking_status, c("never smoked", "formerly smoked", "smokes"))
# married
df$ever_married <- factor(if_else(df$ever_married == "Yes", 1, 0))
# for models working properly
df$stroke <- factor(ifelse(df$stroke == 1, "yes", "no"), levels = c("no", "yes"))
dfSkip id column
df$id <- NULL
skimr::skim(df)| Name | df |
| Number of rows | 5110 |
| Number of columns | 11 |
| _______________________ | |
| Column type frequency: | |
| factor | 8 |
| numeric | 3 |
| ________________________ | |
| Group variables | None |
Variable type: factor
| skim_variable | n_missing | complete_rate | ordered | n_unique | top_counts |
|---|---|---|---|---|---|
| gender | 0 | 1.0 | FALSE | 3 | Fem: 2994, Mal: 2115, Oth: 1 |
| hypertension | 0 | 1.0 | FALSE | 2 | 0: 4612, 1: 498 |
| heart_disease | 0 | 1.0 | FALSE | 2 | 0: 4834, 1: 276 |
| ever_married | 0 | 1.0 | FALSE | 2 | 1: 3353, 0: 1757 |
| work_type | 0 | 1.0 | FALSE | 5 | Pri: 2925, Sel: 819, chi: 687, Gov: 657 |
| Residence_type | 0 | 1.0 | FALSE | 2 | Urb: 2596, Rur: 2514 |
| smoking_status | 1544 | 0.7 | FALSE | 3 | nev: 1892, for: 885, smo: 789 |
| stroke | 0 | 1.0 | FALSE | 2 | no: 4861, yes: 249 |
Variable type: numeric
| skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
|---|---|---|---|---|---|---|---|---|---|---|
| age | 0 | 1.00 | 43.23 | 22.61 | 0.08 | 25.00 | 45.00 | 61.00 | 82.00 | ▅▆▇▇▆ |
| avg_glucose_level | 0 | 1.00 | 106.15 | 45.28 | 55.12 | 77.24 | 91.88 | 114.09 | 271.74 | ▇▃▁▁▁ |
| bmi | 201 | 0.96 | 28.89 | 7.85 | 10.30 | 23.50 | 28.10 | 33.10 | 97.60 | ▇▇▁▁▁ |
Target ‘stroke’ is imbalanced!
‘smoking_status’ completeness rate 0.7
df %>% group_by(stroke, smoking_status) %>% summarise(N=n())BMI’s complete rate 0.96
df %>% filter(is.na(bmi)) %>% group_by(stroke) %>% summarise(N=n())One ‘Other’ gender to be removed
df <- df %>% filter(gender != "Other")GGally::ggpairs(df, aes(color = stroke, alpha = 0.2, dotsize = 0.02),
upper = list(continuous = GGally::wrap("cor", size = 2.5)),
diag = list(continuous = "barDiag")) +
scale_color_brewer(palette = "Set1", direction = -1) +
scale_fill_brewer(palette = "Set1", direction = -1)ggplot(df, aes(stroke, age)) +
geom_boxplot(aes(fill = stroke), alpha = 0.5, varwidth = T, notch = T) +
geom_violin(aes(fill = stroke), alpha = 0.5) +
scale_fill_brewer(palette = "Set1", direction = -1) +
xlab("")OBS! There are observation with age much below 20 y.o., even close to 0!
These are very young kids or babies - should we even include them in the analysis?
If so, it will be prediction for adults. Also, stroke in kids probably has very different causes compared to stroke in adults/old folk.
ggplot(df, aes(stroke, age)) +
geom_violin(alpha=0.3) +
geom_jitter(alpha=0.2, size=0.8, width = 0.15, height = 0.1, aes(color = gender)) +
geom_boxplot(alpha = 0.2) +
scale_color_brewer(palette = "Set2", direction = -1)ggplot(df, aes(stroke, avg_glucose_level)) +
geom_boxplot(aes(fill = stroke), alpha = 0.5, varwidth = T, notch = T) +
geom_violin(aes(fill = stroke), alpha = 0.5) +
scale_fill_brewer(palette = "Set1", direction = -1) +
xlab("")This average glucose level is probably the results of fasting blood sugar test
If I correctly understand this CDC information on diabetes, values greater than 126 is evidence of diabetes. But 250? Is it realistic?
ggplot(df, aes(stroke, bmi)) +
geom_boxplot(aes(fill = stroke), alpha = 0.5, varwidth = T, notch = T) +
geom_violin(aes(fill = stroke), alpha = 0.5) +
scale_fill_brewer(palette = "Set1", direction = -1) +
xlab("")BMI over 40 is the 3rd class of obesity - BMI over 75 should not exist at all.
Let’s look at this weird points
facet_names <- c("no" = "no stroke", "yes" = "stroke")
ggplot(df, aes(age, bmi)) +
geom_point(color = "steelblue", alpha = 0.8, size = 0.5) +
scale_fill_brewer(palette = "Set1", direction = -1) +
facet_grid(rows = vars(stroke), labeller = as_labeller(facet_names)) +
guides()Patients with BMI over 75 are also very young. Suspicious.
ggplot(df, aes(age, avg_glucose_level)) +
geom_point(aes(color = smoking_status), alpha = 0.6, size = 1) +
scale_fill_brewer(palette = "Set1", direction = -1) +
facet_grid(rows = vars(stroke), labeller = as_labeller(facet_names)) +
guides()OBS! Kids are mainly ‘Unknown’ smoking status; both target groups are divided into two clusters – I’m curious why.
It is not gender, nor heart disease or any other factor we have in the data set.
ggplot(df, aes(smoking_status, age)) +
geom_boxplot(aes(fill = stroke), alpha = 0.5, varwidth = T, notch = T) +
scale_fill_brewer(palette = "Set1", direction = -1) +
xlab("")Kids are mainly non-smokers of course. Note the same two outliers of age below 20 in stroke-yes
ggplot(df, aes(avg_glucose_level, bmi)) +
geom_point(aes(color = age), alpha = 0.6, size = 1) +
scale_fill_brewer(palette = "Set1", direction = -1) +
facet_grid(rows = vars(stroke), labeller = as_labeller(facet_names)) +
guides()BMI outliers: super high BMI but super low glucose levels? How’s that possible?
Can it be a bias introduced by testing protocol misuse? Like instead of measuring glucose before eating, in some samples it was done after eating?
gender <- df %>% group_by(stroke, gender) %>% summarize(N=n())
ggplot(gender, aes(stroke, N)) +
geom_bar(aes(fill=gender), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = -1) +
ylab("proportion")Proportions in both stroke groups are roughly the same
hyptens <- df %>% group_by(stroke, hypertension) %>% summarize(N = n())
ggplot(hyptens, aes(stroke, N)) +
geom_bar(aes(fill = hypertension), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = -1) +
ylab("proportion")Hypertension occurred more often in stroke-yes
heart <- df %>% group_by(stroke, heart_disease) %>% summarize(N=n())
ggplot(heart, aes(stroke, N)) +
geom_bar(aes(fill = heart_disease), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = 1) +
ylab("proportion")married <- df %>% group_by(stroke, ever_married) %>% summarize(N=n())
ggplot(married, aes(stroke, N)) +
geom_bar(aes(fill = ever_married), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = -1) +
ylab("proportion")Marriage is bad :)
work <- df %>% group_by(stroke, work_type) %>% summarize(N=n())
ggplot(work, aes(stroke, N)) +
geom_bar(aes(fill = work_type), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = 1) +
ylab("proportion")It’s good to be a child
residence <- df %>% group_by(stroke, Residence_type) %>% summarize(N=n())
ggplot(residence, aes(stroke, N)) +
geom_bar(aes(fill = Residence_type), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = 1) +
ylab("proportion")smoking <- df %>% group_by(stroke, smoking_status) %>% summarize(N=n())
ggplot(smoking, aes(stroke, N)) +
geom_bar(aes(fill = smoking_status), alpha = 0.8, stat = "identity", position = "fill") +
scale_fill_brewer(palette = "Set2", direction = 1) +
ylab("proportion")A lot of NAs in smoking_status comes from group
‘Children’ (see work_type). I can replace them with ‘never
smoked’.
df %>% filter(work_type == "children") %>%
group_by(smoking_status) %>%
summarise(N = n(),
avg.age = mean(age),
max.age = max(age),
min.age = min(age))There are several suspicious outliers (like in BMI and glucose). I can safely remove BMI > 75, maybe even BMI > 60 (Remember that BMI > 40 is the highest class of obesity).
What is less safe - it’s removing non-adults (younger than 20 y.o.).
On one hand they provide valid information (age is very important
predictor), on the other hand their stroke cases are really sus and a
lot of predictors do not have sense (or are obvious NAs) for non-adults
(like smoking, marriage status, employment type, residence type etc.);
model-based imputation of smoking_status in children
doesn’t have sense as well, I should rather replace with “never
smoked”.
Since, modelling using all predictors and observations has given me very moderate results (TPR = 1 comes with very high FPR and very low probability cutoff close to 0), I will try various trimming of the data.
Remove bmi > 60
df_trim <- df %>% filter(bmi <= 60 )
skimr::skim(df_trim)| Name | df_trim |
| Number of rows | 4895 |
| Number of columns | 11 |
| _______________________ | |
| Column type frequency: | |
| factor | 8 |
| numeric | 3 |
| ________________________ | |
| Group variables | None |
Variable type: factor
| skim_variable | n_missing | complete_rate | ordered | n_unique | top_counts |
|---|---|---|---|---|---|
| gender | 0 | 1.0 | FALSE | 2 | Fem: 2888, Mal: 2007, Oth: 0 |
| hypertension | 0 | 1.0 | FALSE | 2 | 0: 4449, 1: 446 |
| heart_disease | 0 | 1.0 | FALSE | 2 | 0: 4652, 1: 243 |
| ever_married | 0 | 1.0 | FALSE | 2 | Yes: 3193, No: 1702 |
| work_type | 0 | 1.0 | FALSE | 5 | Pri: 2798, Sel: 774, chi: 671, Gov: 630 |
| Residence_type | 0 | 1.0 | FALSE | 2 | Urb: 2485, Rur: 2410 |
| smoking_status | 1479 | 0.7 | FALSE | 3 | nev: 1847, for: 836, smo: 733 |
| stroke | 0 | 1.0 | FALSE | 2 | no: 4686, yes: 209 |
Variable type: numeric
| skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
|---|---|---|---|---|---|---|---|---|---|---|
| age | 0 | 1 | 42.87 | 22.57 | 0.08 | 25.00 | 44.00 | 60.00 | 82.00 | ▅▆▇▇▆ |
| avg_glucose_level | 0 | 1 | 105.31 | 44.42 | 55.12 | 77.08 | 91.68 | 113.46 | 271.74 | ▇▃▁▁▁ |
| bmi | 0 | 1 | 28.79 | 7.56 | 10.30 | 23.50 | 28.00 | 33.00 | 59.70 | ▂▇▅▁▁ |
BMI is complete, in total approx. 2000 observations are gone
Using package mice
It uses polr - proportional odds model - for
smoking_status and pmm - predictive mean
matching - for bmi
library(mice)
imp_mice <- mice(df_trim)##
## iter imp variable
## 1 1 smoking_status
## 1 2 smoking_status
## 1 3 smoking_status
## 1 4 smoking_status
## 1 5 smoking_status
## 2 1 smoking_status
## 2 2 smoking_status
## 2 3 smoking_status
## 2 4 smoking_status
## 2 5 smoking_status
## 3 1 smoking_status
## 3 2 smoking_status
## 3 3 smoking_status
## 3 4 smoking_status
## 3 5 smoking_status
## 4 1 smoking_status
## 4 2 smoking_status
## 4 3 smoking_status
## 4 4 smoking_status
## 4 5 smoking_status
## 5 1 smoking_status
## 5 2 smoking_status
## 5 3 smoking_status
## 5 4 smoking_status
## 5 5 smoking_status
df_imp <- complete(imp_mice)Number of NAs in BMI: 0
Number of NAs in Smoking: 0
bmi_imp_comp <- bind_rows(select(df_trim, bmi, stroke) %>% mutate(type = rep("original", nrow(df_trim))),
select(df_imp, bmi, stroke) %>% mutate(type = rep("imputed", nrow(df_imp))))
ggplot(bmi_imp_comp, aes(bmi)) +
geom_histogram(aes(fill = type), alpha = 0.8) +
facet_grid(cols = vars(stroke))Means have not changed, which is good, I suppose.
smoke_imp_comp <- bind_rows(select(df_trim, smoking_status, stroke) %>% mutate(type = rep("original", nrow(df_trim))),
select(df_imp, smoking_status, stroke) %>% mutate(type = rep("imputed", nrow(df_imp))))
ggplot(smoke_imp_comp, aes(smoking_status)) +
geom_bar(aes(fill=type), alpha=0.8, position="dodge") +
facet_grid(cols = vars(stroke)) +
xlab("")+
theme(axis.text.x = element_text(angle=45, vjust = 0.5))Counts increased proportionally in all Smoking groups
Scale numeric features (including imputed BMI)
# use caret::preProcess()
# preProcValues <- preProcess(training, method = c("center", "scale"))
df_scaled <- df_imp %>%
select(avg_glucose_level, age, bmi) %>%
scale() %>%
data.frame()I’ve decided to omit smoking_status completely - it won’t be dummified
# select vars
to_dum <- df_imp %>% select(gender, work_type, Residence_type, smoking_status)
# make an obj
dummies <- dummyVars(~ ., data = to_dum)
# apply it
df_dummy <- data.frame(predict(dummies, newdata = to_dum))
head(df_dummy)df_proc <- bind_cols(df_scaled, df_dummy, select(df_trim, hypertension, heart_disease, ever_married, stroke))
head(df_proc)ROC-optimization is better when data is imbalanced
# for ROC
fit_ctrl_roc <- trainControl(## 5-fold CV
method = "repeatedcv",
number = 5,
repeats = 10,
allowParallel = T,
classProbs = T,
summaryFunction = twoClassSummary)Imbalanced data - use SMOTE to create training data set, but not testing data set
set.seed(1234)
sample_set <- createDataPartition(y = df_proc$stroke, p = .75, list = FALSE)
df_train <- df_proc[sample_set,]
df_test <- df_proc[-sample_set,]
# DMwR::SMOTE for imbalanced data: over=225 and under=150 give me 1:1 ratio
df_train_smote <- SMOTE(stroke ~ ., data.frame(df_train), perc.over = 1725, perc.under = 106)
df_train_smote %>% group_by(stroke) %>% summarise(N=n())set.seed(122)
library(doParallel)
cl <- makePSOCKcluster(THREADS)
registerDoParallel(cl)
fit_rf <- train(stroke ~ .,
data = df_train_smote,
metric = "ROC",
method = "rf",
trControl = fit_ctrl_roc,
tuneGrid = expand.grid(.mtry = seq(2, 19, 1)),
verbosity = 0,
ntree = 10,
nodesize = 1,
verbose = FALSE)
stopCluster(cl)
fit_rf## Random Forest
##
## 6735 samples
## 20 predictor
## 2 classes: 'no', 'yes'
##
## No pre-processing
## Resampling: Cross-Validated (5 fold, repeated 10 times)
## Summary of sample sizes: 5388, 5387, 5388, 5389, 5388, 5387, ...
## Resampling results across tuning parameters:
##
## mtry ROC Sens Spec
## 2 0.9832741 0.9780347 0.9307463
## 3 0.9854463 0.9877120 0.9384113
## 4 0.9868466 0.9886314 0.9421850
## 5 0.9872561 0.9886319 0.9434624
## 6 0.9878625 0.9883648 0.9456307
## 7 0.9881608 0.9874154 0.9453634
## 8 0.9885465 0.9864355 0.9459281
## 9 0.9884783 0.9848321 0.9462544
## 10 0.9887124 0.9848027 0.9457792
## 11 0.9883291 0.9834667 0.9458980
## 12 0.9882832 0.9827547 0.9453341
## 13 0.9882701 0.9821608 0.9447099
## 14 0.9886392 0.9820725 0.9441754
## 15 0.9878338 0.9818044 0.9449181
## 16 0.9878753 0.9804394 0.9441156
## 17 0.9877343 0.9801425 0.9430463
## 18 0.9875217 0.9800827 0.9430454
## 19 0.9874509 0.9788657 0.9422436
##
## ROC was used to select the optimal model using the largest value.
## The final value used for the model was mtry = 10.
imp_vars_rf <- varImp(fit_rf)
plot(imp_vars_rf, main = "Variable Importance with RF")a function for roc-stuff
get_roc <- function(fit.obj, testing.df){
pred_prob <- predict.train(fit.obj, newdata = testing.df, type = "prob")
pred_roc <- prediction(predictions = pred_prob$yes, labels = testing.df$stroke)
perf_roc <- performance(pred_roc, measure = "tpr", x.measure = "fpr")
return(list(perf_roc, pred_roc))
}# calculate ROC
perf_pred <- get_roc(fit_rf, df_test)
perf_rf <- perf_pred[[1]]
pred_rf <- perf_pred[[2]]
# take AUC
auc_rf <- round(unlist(slot(performance(pred_rf, measure = "auc"), "y.values")), 3)
# plot
plot(perf_rf, main = "Random Fores ROC curve", col = "steelblue", lwd = 3)
abline(a = 0, b = 1, lwd = 3, lty = 2, col = 1)
legend(x = 0.7, y = 0.3, legend = paste0("AUC = ", auc_rf))So, we can adjust TPR/FPR cutoff to predict all stroke cases.
At which probability cut-off, you’ll get TPR = 1.0?
# use pred_rf (pred_roc) object
plot(performance(pred_rf, measure = "tpr", x.measure = "cutoff"),
col = "steelblue",
ylab = "Rate",
xlab = "Probability cutoff")
plot(performance(pred_rf, measure = "fpr", x.measure = "cutoff"),
add = T, col = "red")
legend(x = 0.65,y = 0.7, c("TPR (Recall)", "FPR (1-Spec)"),
lty = 1, col = c('steelblue', 'red'), bty = 'n', cex = 1, lwd = 2)
#abline(v = 0.02, lwd = 2, lty=6)
title("RF")Using desired cut-off: we want to maximize TPR (Sensitivity, Recall)!
According to the TPR/FPR plot (above) the optimal cutoff is.
# predict probabilities
pred_prob_rf <- predict(fit_rf, newdata = df_test, type = "prob")
# choose your cut-off
cutoff = 0.11
# turn probabilities into classes
pred_class_rf <- ifelse(pred_prob_rf$yes > cutoff, "yes", "no")
pred_class_rf <- as.factor(pred_class_rf)
cm_rf <- confusionMatrix(data = pred_class_rf,
reference = df_test$stroke,
mode = "everything",
positive = "yes")
cm_rf## Confusion Matrix and Statistics
##
## Reference
## Prediction no yes
## no 953 22
## yes 262 40
##
## Accuracy : 0.7776
## 95% CI : (0.7538, 0.8001)
## No Information Rate : 0.9514
## P-Value [Acc > NIR] : 1
##
## Kappa : 0.1514
##
## Mcnemar's Test P-Value : <2e-16
##
## Sensitivity : 0.64516
## Specificity : 0.78436
## Pos Pred Value : 0.13245
## Neg Pred Value : 0.97744
## Precision : 0.13245
## Recall : 0.64516
## F1 : 0.21978
## Prevalence : 0.04855
## Detection Rate : 0.03132
## Detection Prevalence : 0.23649
## Balanced Accuracy : 0.71476
##
## 'Positive' Class : yes
##
set.seed(122)
#cl <- makePSOCKcluster(THREADS)
#registerDoParallel(cl)
fit_adb <- train(stroke ~ .,
data = df_train_smote,
metric = "ROC",
method = "AdaBoost.M1",
trControl = fit_ctrl_roc,
tuneLength = 10,
verbosity = 0,
verbose = FALSE)
# coeflearn=Freund was chosen by automatic grid search, mfinal choice comes from there too
# stop CLuster
stopCluster(cl)
fit_adb# calculate ROC
perf_pred_adb <- get_roc(fit_adb, df_test)
perf_adb <- perf_pred_adb[[1]]
pred_adb <- perf_pred_adb[[2]]
# take AUC
auc_adb <- round(unlist(slot(performance(pred_adb, measure = "auc"), "y.values")), 3)
# plot
plot(perf_adb, main = "AdaBoost ROC curve", col = "steelblue", lwd = 3)
abline(a = 0, b = 1, lwd = 3, lty = 2, col = 1)
legend(x = 0.7, y = 0.3, legend = paste0("AUC = ", auc_adb))At which probability cut-off, you’ll get TPR = 1.0?
# use pred_rf (pred_roc) object
plot(performance(pred_adb, measure = "tpr", x.measure = "cutoff"),
col="steelblue",
ylab = "Rate",
xlab="Probability cutoff")
plot(performance(pred_adb, measure = "fpr", x.measure = "cutoff"),
add = T, col = "red")
legend(x = 0.6,y = 0.7, c("TPR (Recall)", "FPR (1-Spec)"),
lty = 1, col =c('steelblue', 'red'), bty = 'n', cex = 1, lwd = 2)
#abline(v = 0.1, lwd = 2, lty=6)
title("AdaBoost.M1")pred_prob_adb <- predict(fit_adb, newdata = df_test, type = "prob")
# choose your cut-off
cutoff = 0.11
# turn probabilities into classes
pred_class_adb <- ifelse(pred_prob_adb$yes > cutoff, "yes", "no")
pred_class_adb <- as.factor(pred_class_adb)
cm_adb <- confusionMatrix(data = pred_class_adb,
reference = df_test$stroke,
mode = "everything",
positive = "yes")
cm_adbxgbTree has 7 parameters
set.seed(121)
fit_xgb <- train(stroke ~ .,
data = df_train_smote,
method = "xgbTree",
metric = "ROC",
trControl = fit_ctrl_roc,
tuneLength = 4,
nthreads = 16,
verbose = FALSE,
verbosity = 0)
fit_xgb$bestTuneimp_vars_xgb <- varImp(fit_xgb)
plot(imp_vars_xgb, main = "Variable Importance with XGB")# calculate ROC
perf_pred_xgb <- get_roc(fit_xgb, df_test)
perf_xgb <- perf_pred_xgb[[1]]
pred_xgb <- perf_pred_xgb[[2]]
# take AUC
auc_xgb <- round(unlist(slot(performance(pred_xgb, measure = "auc"), "y.values")), 3)
# plot
plot(perf_xgb, main = "XGB ROC curve", col = "steelblue", lwd = 3)
abline(a = 0, b = 1, lwd = 3, lty = 2, col = 1)
legend(x = 0.7, y = 0.3, legend = paste0("AUC = ", auc_xgb))# use pred_xgb object
plot(performance(pred_xgb, measure = "tpr", x.measure = "cutoff"),
col = "steelblue",
ylab = "Rate",
xlab = "Probability cutoff")
plot(performance(pred_xgb, measure = "fpr", x.measure = "cutoff"),
add = T, col = "red")
legend(x = 0.6,y = 0.7, c("TPR (Recall)", "FPR (1-Spec)"),
lty = 1, col = c('steelblue', 'red'), bty = 'n', cex = 1, lwd = 2)
#abline(v = 0.1, lwd = 2, lty=6)
title("xgbTree")pred_prob_xgb <- predict(fit_xgb, newdata=df_test, type = "prob")
# choose your cut-off
cutoff = 0.12
# turn probabilities into classes
pred_class_xgb <- ifelse(pred_prob_xgb$yes > cutoff, "yes", "no")
pred_class_xgb <- as.factor(pred_class_xgb)
cm_xgb <- confusionMatrix(data = pred_class_xgb,
reference = df_test$stroke,
mode = "everything",
positive = "yes")
cm_xgbsave.image("data/workspace.RData")